import matplotlib.pyplot as plt
from skimage import io
from matplotlib import cm
from mpl_toolkits.axes_grid1 import ImageGrid
import numpy as np
import os, PIL, pathlib, math
from imgaug import augmenters as iaa
from PIL import Image
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.preprocessing.image import ImageDataGenerator
%matplotlib inline
strategy = tf.distribute.MirroredStrategy()
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
def show_grid(image_list,nrows,ncols,label_list=None,show_labels=False,savename=None,figsize=(10,10),showaxis='off'):
if type(image_list) is not list:
if(image_list.shape[-1]==1):
image_list = [image_list[i,:,:,0] for i in range(image_list.shape[0])]
elif(image_list.shape[-1]==3):
image_list = [image_list[i,:,:,:] for i in range(image_list.shape[0])]
fig = plt.figure(None, figsize,frameon=False)
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols=(nrows, ncols), # creates 2x2 grid of axes
axes_pad=0.3, # pad between axes in inch.
share_all=True,
)
for i in range(nrows*ncols):
ax = grid[i]
ax.imshow((image_list[i]*255).astype(np.uint8),cmap='Greys_r') # The AxesGrid object work as a list of axes.
ax.axis('off')
if show_labels:
ax.set_title(class_mapping[y_int[i]])
if savename != None:
plt.savefig(savename,bbox_inches='tight')
# Defining Image augmentors
data_augmentation = keras.Sequential([
layers.experimental.preprocessing.RandomRotation(0.2),
layers.experimental.preprocessing.RandomTranslation(
0.2, 0.2, fill_mode='reflect', interpolation='bilinear'),
])
aug3 = iaa.MotionBlur(k=5, angle=[-90, 90])
aug4 = iaa.CoarseSaltAndPepper((0,0.02), size_px=(2, 8))
aug5 = iaa.AdditiveGaussianNoise(scale=(0, 0.05*255))
aug6 = iaa.Sharpen(alpha=(0,0.05), lightness=1.0)
aug7 = iaa.Dropout(p=(0, 0.05))
aug8 = iaa.LinearContrast((0.9, 1.0))
def add_aug(image):
#image = aug3.augment_image(image)
#image = aug4.augment_image(image)
#image = aug5.augment_image(image)
#image = aug6.augment_image(image)
#image = aug7.augment_image(image)
#image = aug8.augment_image(image)
return image
# Defining the three models in functions
def simpleModel():
model = Sequential([
data_augmentation,
#layers.experimental.preprocessing.Rescaling(1./255),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Dropout(0.2),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
def AlexNet(): #batch 32, 224, 0.002, 0.01, True
model = Sequential([
#data_augmentation,
#layers.experimental.preprocessing.Rescaling(1./255),
layers.Conv2D(96, 11, strides=4, padding='valid', activation='relu'),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=3, strides=2),
layers.Conv2D(256, 5, strides=1, padding='same', activation='relu'),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=3, strides=2),
layers.Conv2D(384, 3, strides=1, padding='same', activation='relu'),
layers.BatchNormalization(),
layers.Conv2D(384, 3, strides=1, padding='same', activation='relu'),
layers.BatchNormalization(),
layers.Conv2D(256, 3, strides=1, padding='same', activation='relu'),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=3, strides=2),
layers.Flatten(),
layers.Dense(4096, activation='relu'),
layers.Dropout(0.5),
layers.Dense(4096, activation='relu'),
layers.Dropout(0.5),
layers.Dense(num_classes)
])
opt = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.01, nesterov=True)
model.compile(optimizer=opt,
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
def VGG16(): #batch 64, 224, 0.002, 0.01, True
model = Sequential([
#data_augmentation,
#layers.experimental.preprocessing.Rescaling(1./255),
layers.Conv2D(64, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(64, 3, strides=1, padding='same', activation='relu'),
layers.MaxPooling2D(pool_size=2), # stride defaults to pool_size which is OK
layers.Conv2D(128, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(128, 3, strides=1, padding='same', activation='relu'),
layers.MaxPooling2D(pool_size=2), # stride defaults to pool_size which is OK
layers.Conv2D(256, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(256, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(256, 3, strides=1, padding='same', activation='relu'),
layers.MaxPooling2D(pool_size=2), # stride defaults to pool_size which is OK
layers.Conv2D(512, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(512, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(512, 3, strides=1, padding='same', activation='relu'),
layers.MaxPooling2D(pool_size=2), # stride defaults to pool_size which is OK
layers.Conv2D(512, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(512, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(512, 3, strides=1, padding='same', activation='relu'),
layers.MaxPooling2D(pool_size=2), # stride defaults to pool_size which is OK
layers.Flatten(),
layers.Dense(4096, activation='relu'),
layers.Dense(4096, activation='relu'),
layers.Dense(num_classes)
])
opt = tf.keras.optimizers.SGD(learning_rate=0.02, momentum=0.2, nesterov=True)
model.compile(optimizer=opt,
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
num_classes = 7
batch_size = 64
img_height = 180 # 180, 227, 224
img_width = 180 # 180, 227, 224
data_dir = pathlib.Path('takaFrames_dataset/')
#image_count = len(list(data_dir.glob('*/*.jpg')))
#print(image_count)
print("\n")
datagen_args = dict(rescale=1./255, validation_split=0.1, preprocessing_function=add_aug)
datagen = ImageDataGenerator(**datagen_args)
train_ds = datagen.flow_from_directory(
data_dir,
subset="training",
target_size=(img_height, img_width),
batch_size=batch_size)
print("\n")
val_ds = datagen.flow_from_directory(
data_dir,
subset="validation",
target_size=(img_height, img_width),
batch_size=batch_size)
print("\n")
print(train_ds.class_indices)
class_mapping = {v:k for k,v in train_ds.class_indices.items()}
x,y = next(train_ds)
print('x: ',type(x))
print('y: ',type(y))
print('x: ',x.shape)
print('y: ',y.shape)
print("\n")
y_int = np.argmax(y,axis=-1)
show_grid(x,4,8,label_list=y_int,show_labels=True,figsize=(20,10))
INFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0', '/job:localhost/replica:0/task:0/device:GPU:1', '/job:localhost/replica:0/task:0/device:GPU:2', '/job:localhost/replica:0/task:0/device:GPU:3')
Number of devices: 4
Found 43694 images belonging to 7 classes.
Found 4851 images belonging to 7 classes.
{'Children': 0, 'Cosmetics': 1, 'FashionAccessories': 2, 'Household': 3, 'Ladies': 4, 'Men': 5, 'Sports': 6}
x: <class 'numpy.ndarray'>
y: <class 'numpy.ndarray'>
x: (64, 180, 180, 3)
y: (64, 7)
with strategy.scope():
model = simpleModel()
checkpoint = ModelCheckpoint("checkpoint.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto')
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=5, verbose=1, mode='auto')
epochs = 15
history = model.fit(train_ds,
validation_data=val_ds,
validation_steps=5,
epochs=epochs, # 100 seems to be enough to achieve >90% accuracies
callbacks=[checkpoint,early]
)
# Plotting results
plt.plot(history.history["accuracy"])
plt.plot(history.history['val_accuracy'])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title("model accuracy")
plt.ylabel("Accuracy")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","loss","Validation Loss"])
plt.show()
WARNING:tensorflow:Using MirroredStrategy eagerly has significant overhead currently. We will be working on improving this in the future, but for now please wrap `call_for_each_replica` or `experimental_run` or `experimental_run_v2` inside a tf.function to get the best performance.
Epoch 1/15
WARNING:tensorflow:From /home/mdl-ws/environments/env0/lib/python3.8/site-packages/tensorflow/python/data/ops/multi_device_iterator_ops.py:601: get_next_as_optional (from tensorflow.python.data.ops.iterator_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Iterator.get_next_as_optional()` instead.
INFO:tensorflow:batch_all_reduce: 10 all-reduces with algorithm = nccl, num_packs = 1
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:batch_all_reduce: 10 all-reduces with algorithm = nccl, num_packs = 1
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
683/683 [==============================] - ETA: 0s - loss: 1.3718 - accuracy: 0.4833INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
Epoch 00001: val_accuracy improved from -inf to 0.60000, saving model to checkpoint.h5
683/683 [==============================] - 98s 143ms/step - loss: 1.3718 - accuracy: 0.4833 - val_loss: 1.1797 - val_accuracy: 0.6000
Epoch 2/15
683/683 [==============================] - ETA: 0s - loss: 0.7133 - accuracy: 0.7411
Epoch 00002: val_accuracy did not improve from 0.60000
683/683 [==============================] - 95s 139ms/step - loss: 0.7133 - accuracy: 0.7411 - val_loss: 1.3155 - val_accuracy: 0.5906
Epoch 3/15
683/683 [==============================] - ETA: 0s - loss: 0.4640 - accuracy: 0.8342
Epoch 00003: val_accuracy improved from 0.60000 to 0.71250, saving model to checkpoint.h5
683/683 [==============================] - 95s 139ms/step - loss: 0.4640 - accuracy: 0.8342 - val_loss: 0.9636 - val_accuracy: 0.7125
Epoch 4/15
683/683 [==============================] - ETA: 0s - loss: 0.3405 - accuracy: 0.8803
Epoch 00004: val_accuracy improved from 0.71250 to 0.85312, saving model to checkpoint.h5
683/683 [==============================] - 95s 139ms/step - loss: 0.3405 - accuracy: 0.8803 - val_loss: 0.5506 - val_accuracy: 0.8531
Epoch 5/15
683/683 [==============================] - ETA: 0s - loss: 0.2570 - accuracy: 0.9097
Epoch 00005: val_accuracy did not improve from 0.85312
683/683 [==============================] - 95s 139ms/step - loss: 0.2570 - accuracy: 0.9097 - val_loss: 0.5571 - val_accuracy: 0.8344
Epoch 6/15
683/683 [==============================] - ETA: 0s - loss: 0.1997 - accuracy: 0.9306
Epoch 00006: val_accuracy did not improve from 0.85312
683/683 [==============================] - 95s 139ms/step - loss: 0.1997 - accuracy: 0.9306 - val_loss: 0.6435 - val_accuracy: 0.8125
Epoch 7/15
683/683 [==============================] - ETA: 0s - loss: 0.1661 - accuracy: 0.9423
Epoch 00007: val_accuracy did not improve from 0.85312
683/683 [==============================] - 96s 140ms/step - loss: 0.1661 - accuracy: 0.9423 - val_loss: 1.0240 - val_accuracy: 0.7188
Epoch 8/15
683/683 [==============================] - ETA: 0s - loss: 0.1510 - accuracy: 0.9476
Epoch 00008: val_accuracy did not improve from 0.85312
683/683 [==============================] - 96s 140ms/step - loss: 0.1510 - accuracy: 0.9476 - val_loss: 1.0016 - val_accuracy: 0.8094
Epoch 9/15
683/683 [==============================] - ETA: 0s - loss: 0.1409 - accuracy: 0.9507
Epoch 00009: val_accuracy improved from 0.85312 to 0.86250, saving model to checkpoint.h5
683/683 [==============================] - 96s 140ms/step - loss: 0.1409 - accuracy: 0.9507 - val_loss: 0.5024 - val_accuracy: 0.8625
Epoch 10/15
683/683 [==============================] - ETA: 0s - loss: 0.1182 - accuracy: 0.9589
Epoch 00010: val_accuracy did not improve from 0.86250
683/683 [==============================] - 95s 139ms/step - loss: 0.1182 - accuracy: 0.9589 - val_loss: 0.8655 - val_accuracy: 0.8000
Epoch 11/15
683/683 [==============================] - ETA: 0s - loss: 0.1177 - accuracy: 0.9603
Epoch 00011: val_accuracy improved from 0.86250 to 0.86563, saving model to checkpoint.h5
683/683 [==============================] - 96s 141ms/step - loss: 0.1177 - accuracy: 0.9603 - val_loss: 0.4923 - val_accuracy: 0.8656
Epoch 12/15
683/683 [==============================] - ETA: 0s - loss: 0.1032 - accuracy: 0.9651
Epoch 00012: val_accuracy did not improve from 0.86563
683/683 [==============================] - 96s 140ms/step - loss: 0.1032 - accuracy: 0.9651 - val_loss: 1.0761 - val_accuracy: 0.7500
Epoch 13/15
683/683 [==============================] - ETA: 0s - loss: 0.0987 - accuracy: 0.9666
Epoch 00013: val_accuracy did not improve from 0.86563
683/683 [==============================] - 96s 140ms/step - loss: 0.0987 - accuracy: 0.9666 - val_loss: 0.9546 - val_accuracy: 0.7844
Epoch 14/15
683/683 [==============================] - ETA: 0s - loss: 0.0966 - accuracy: 0.9670
Epoch 00014: val_accuracy did not improve from 0.86563
683/683 [==============================] - 96s 141ms/step - loss: 0.0966 - accuracy: 0.9670 - val_loss: 0.4892 - val_accuracy: 0.8406
Epoch 15/15
683/683 [==============================] - ETA: 0s - loss: 0.0860 - accuracy: 0.9709
Epoch 00015: val_accuracy did not improve from 0.86563
683/683 [==============================] - 96s 141ms/step - loss: 0.0860 - accuracy: 0.9709 - val_loss: 0.5433 - val_accuracy: 0.8313
# Testing the generalisability of the Takashimaya Ngee Ann City trained model against images from Isetan Shaw Departmental store.
test_dir = pathlib.Path('Isetan45_testSet/')
classes_ds = tf.keras.preprocessing.image_dataset_from_directory('takaFrames_dataset/')
class_names = classes_ds.class_names
imgs_only = [img for img in os.listdir(test_dir)]
for img in imgs_only:
img = os.path.join(test_dir, img)
print(img)
img = keras.preprocessing.image.load_img(img, target_size=(img_width, img_height))
plt.imshow(img)
img = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
plt.show()
print("Likely {} with {:.2f}% confidence.\n\n".format(class_names[np.argmax(score)], 100 * np.max(score)))
Found 48545 files belonging to 7 classes. Isetan45_testSet/Cosmetics_3.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Accesories_4.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Men_1.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Children_4.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Cosmetics_2.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Household_1.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Household_3.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Sports_4.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Cosmetics_1.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Sports_1.JPG
Likely Children with 100.00% confidence. Isetan45_testSet/Men_5.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Ladies_2.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Children_3.JPG
Likely Children with 100.00% confidence. Isetan45_testSet/Accesories_2.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Children_5.JPG
Likely Children with 100.00% confidence. Isetan45_testSet/Sports_5.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Household_2.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Sports_2.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Cosmetics_4.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Ladies_4.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Accesories_3.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Household_5.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Cosmetics_5.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Children_1.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Children_2.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Household_4.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Ladies_3.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Sports_3.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Ladies_5.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Accesories_5.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Ladies_1.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Men_3.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Men_4.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Accesories_1.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Men_2.JPG
Likely Sports with 100.00% confidence.